hvm: MTRR MSRs save/restore support.
authorKeir Fraser <keir.fraser@citrix.com>
Wed, 12 Dec 2007 10:25:18 +0000 (10:25 +0000)
committerKeir Fraser <keir.fraser@citrix.com>
Wed, 12 Dec 2007 10:25:18 +0000 (10:25 +0000)
Signed-off-by: Disheng Su <disheng.su@intel.com>
xen/arch/x86/hvm/mtrr.c
xen/include/asm-x86/mtrr.h
xen/include/public/arch-x86/hvm/save.h

index a817bd90218497b5523721b43296d8a1af098f55..cfa8f9038969f783c73c5e482dd98d903f4a4e61 100644 (file)
@@ -769,3 +769,83 @@ int32_t hvm_set_mem_pinned_cacheattr(
 
     return 0;
 }
+
+static int hvm_save_mtrr_msr(struct domain *d, hvm_domain_context_t *h)
+{
+    int i;
+    struct vcpu *v;
+    struct hvm_hw_mtrr hw_mtrr;
+    struct mtrr_state *mtrr_state;
+    /* save mtrr&pat */
+    for_each_vcpu(d, v)
+    {
+        mtrr_state = &v->arch.hvm_vcpu.mtrr;
+
+        hw_mtrr.msr_pat_cr = v->arch.hvm_vcpu.pat_cr;
+
+        hw_mtrr.msr_mtrr_def_type = mtrr_state->def_type
+                                | (mtrr_state->enabled << 10);
+        hw_mtrr.msr_mtrr_cap = mtrr_state->mtrr_cap;
+
+        for ( i = 0; i < MTRR_VCNT; i++ )
+        {
+            /* save physbase */
+            hw_mtrr.msr_mtrr_var[i*2] =
+                ((uint64_t*)mtrr_state->var_ranges)[i*2];
+            /* save physmask */
+            hw_mtrr.msr_mtrr_var[i*2+1] =
+                ((uint64_t*)mtrr_state->var_ranges)[i*2+1];
+        }
+
+        for ( i = 0; i < NUM_FIXED_MSR; i++ )
+            hw_mtrr.msr_mtrr_fixed[i] =
+                ((uint64_t*)mtrr_state->fixed_ranges)[i];
+
+        if ( hvm_save_entry(MTRR, v->vcpu_id, h, &hw_mtrr) != 0 )
+            return 1;
+    }
+    return 0;
+}
+
+static int hvm_load_mtrr_msr(struct domain *d, hvm_domain_context_t *h)
+{
+    int vcpuid, i;
+    struct vcpu *v;
+    struct mtrr_state *mtrr_state;
+    struct hvm_hw_mtrr hw_mtrr;
+
+    vcpuid = hvm_load_instance(h);
+    if ( vcpuid > MAX_VIRT_CPUS || (v = d->vcpu[vcpuid]) == NULL )
+    {
+        gdprintk(XENLOG_ERR, "HVM restore: domain has no vcpu %u\n", vcpuid);
+        return -EINVAL;
+    }
+
+    if ( hvm_load_entry(MTRR, h, &hw_mtrr) != 0 )
+        return -EINVAL;
+
+    mtrr_state = &v->arch.hvm_vcpu.mtrr;
+
+    pat_msr_set(&v->arch.hvm_vcpu.pat_cr, hw_mtrr.msr_pat_cr);
+
+    mtrr_state->mtrr_cap = hw_mtrr.msr_mtrr_cap;
+
+    for ( i = 0; i < NUM_FIXED_MSR; i++ )
+        mtrr_fix_range_msr_set(mtrr_state, i, hw_mtrr.msr_mtrr_fixed[i]);
+
+    for ( i = 0; i < MTRR_VCNT; i++ )
+    {
+        mtrr_var_range_msr_set(mtrr_state,
+                MTRRphysBase_MSR(i), hw_mtrr.msr_mtrr_var[i*2]);
+        mtrr_var_range_msr_set(mtrr_state,
+                MTRRphysMask_MSR(i), hw_mtrr.msr_mtrr_var[i*2+1]);
+    }
+
+    mtrr_def_type_msr_set(mtrr_state, hw_mtrr.msr_mtrr_def_type);
+
+    v->arch.hvm_vcpu.mtrr.is_initialized = 1;
+    return 0;
+}
+
+HVM_REGISTER_SAVE_RESTORE(MTRR, hvm_save_mtrr_msr, hvm_load_mtrr_msr,
+                          1, HVMSR_PER_VCPU);
index c0a52a0a2639f2b4adafe7badefa7d600570edc7..5ad34f1650504564169edf1294d472a9edaea457 100644 (file)
@@ -47,6 +47,7 @@ struct mtrr_var_range {
 };
 
 #define NUM_FIXED_RANGES 88
+#define NUM_FIXED_MSR 11
 struct mtrr_state {
        struct mtrr_var_range *var_ranges;
        mtrr_type fixed_ranges[NUM_FIXED_RANGES];
index fd77d04e9f132b21c67139b2896ccd882da85088..a2b41f03b3e87bcc89e86a9356ada18cc78fb931 100644 (file)
@@ -405,9 +405,26 @@ struct hvm_hw_pmtimer {
 
 DECLARE_HVM_SAVE_TYPE(PMTIMER, 13, struct hvm_hw_pmtimer);
 
+/*
+ * MTRR MSRs
+ */
+
+struct hvm_hw_mtrr {
+#define MTRR_VCNT 8
+#define NUM_FIXED_MSR 11
+    uint64_t msr_pat_cr;
+    /* mtrr physbase & physmask msr pair*/
+    uint64_t msr_mtrr_var[MTRR_VCNT*2];
+    uint64_t msr_mtrr_fixed[NUM_FIXED_MSR];
+    uint64_t msr_mtrr_cap;
+    uint64_t msr_mtrr_def_type;
+};
+
+DECLARE_HVM_SAVE_TYPE(MTRR, 14, struct hvm_hw_mtrr);
+
 /* 
  * Largest type-code in use
  */
-#define HVM_SAVE_CODE_MAX 13
+#define HVM_SAVE_CODE_MAX 14
 
 #endif /* __XEN_PUBLIC_HVM_SAVE_X86_H__ */